void free_perdomain_pt(struct domain *d)
{
- free_xenheap_page((unsigned long)d->mm.perdomain_pt);
+ free_xenheap_page((unsigned long)d->mm_perdomain_pt);
}
- void arch_do_createdomain(struct exec_domain *ed)
+ static void continue_idle_task(struct domain *d)
+ {
+ reset_stack_and_jump(idle_loop);
+ }
+
+ static void continue_nonidle_task(struct domain *d)
+ {
+ reset_stack_and_jump(ret_from_intr);
+ }
+
-void arch_do_createdomain(struct domain *d)
{
- d->shared_info = (void *)alloc_xenheap_page();
- memset(d->shared_info, 0, PAGE_SIZE);
- ed->vcpu_info = &d->shared_info->vcpu_data[ed->eid];
- d->shared_info->arch.mfn_to_pfn_start =
- virt_to_phys(&machine_to_phys_mapping[0])>>PAGE_SHIFT;
- SHARE_PFN_WITH_DOMAIN(virt_to_page(d->shared_info), d);
- machine_to_phys_mapping[virt_to_phys(d->shared_info) >>
- PAGE_SHIFT] = INVALID_P2M_ENTRY;
-
- d->mm_perdomain_pt = (l1_pgentry_t *)alloc_xenheap_page();
- memset(d->mm_perdomain_pt, 0, PAGE_SIZE);
- machine_to_phys_mapping[virt_to_phys(d->mm_perdomain_pt) >>
- PAGE_SHIFT] = INVALID_P2M_ENTRY;
- ed->mm.perdomain_ptes = d->mm_perdomain_pt;
+ struct domain *d = ed->domain;
- d->mm.perdomain_pt = (l1_pgentry_t *)alloc_xenheap_page();
- memset(d->mm.perdomain_pt, 0, PAGE_SIZE);
- machine_to_phys_mapping[virt_to_phys(d->mm.perdomain_pt) >>
+ #ifdef ARCH_HAS_FAST_TRAP
+ SET_DEFAULT_FAST_TRAP(&d->thread);
+ #endif
+
+ if ( d->id == IDLE_DOMAIN_ID )
+ {
+ d->thread.schedule_tail = continue_idle_task;
+ }
+ else
+ {
+ d->thread.schedule_tail = continue_nonidle_task;
+
+ d->shared_info = (void *)alloc_xenheap_page();
+ memset(d->shared_info, 0, PAGE_SIZE);
++ ed->vcpu_info = &d->shared_info->vcpu_data[ed->eid];
+ d->shared_info->arch.mfn_to_pfn_start =
+ virt_to_phys(&machine_to_phys_mapping[0])>>PAGE_SHIFT;
+ SHARE_PFN_WITH_DOMAIN(virt_to_page(d->shared_info), d);
+ machine_to_phys_mapping[virt_to_phys(d->shared_info) >>
+ PAGE_SHIFT] = INVALID_P2M_ENTRY;
+
- }
++ d->mm_perdomain_pt = (l1_pgentry_t *)alloc_xenheap_page();
++ memset(d->mm_perdomain_pt, 0, PAGE_SIZE);
++ machine_to_phys_mapping[virt_to_phys(d->mm_perdomain_pt) >>
+ PAGE_SHIFT] = INVALID_P2M_ENTRY;
}
-int arch_final_setup_guestos(struct domain *d, full_execution_context_t *c)
+int arch_final_setup_guestos(struct exec_domain *d, full_execution_context_t *c)
{
unsigned long phys_basetab;
int i, rc;
--- /dev/null
-extern void arch_do_createdomain(struct domain *d);
+
+ #ifndef __XEN_DOMAIN_H__
+ #define __XEN_DOMAIN_H__
+
+
+
+ /*
+ * Arch-specifics.
+ */
+
- struct domain *d, full_execution_context_t *c);
++extern void arch_do_createdomain(struct exec_domain *d);
+
+ extern int arch_final_setup_guestos(
++ struct exec_domain *d, full_execution_context_t *c);
+
+ extern void free_perdomain_pt(struct domain *d);
+
+ extern void domain_relinquish_memory(struct domain *d);
+
+ #endif /* __XEN_DOMAIN_H__ */